Self Driving Car Advanced Lane Line Finder

Objective

The objective is to detect lanes. These lanes will have curves within them in so it's not as simple as doing some thresholding and hough lines. The following image is the output that I will be trying to get. I will be able to detect lanes in images and videos.

image1

Overview

  • Calibrate Camera (compute matrix)
  • Distort corretion
  • Create threshold binary image
  • Apply perspective transform
  • Detect lane pixels
  • Detemine curvature
  • Warp line boundaries to image
  • Run pipeline on video
In [132]:
# Import packages
import cv2
import os
import numpy as np
import matplotlib.pyplot as plt
from moviepy.editor import VideoFileClip
from IPython.display import HTML

%matplotlib inline

Calibrate Camera

In [133]:
# Calibrates the camera that is used.
def calibrate_camera(img_cal_dir, nx=9, ny=6, show_images = False):
    
    # Create empty image and object points arrays
    obj_points = []
    img_points = []
    
    # Build object points
    objp = np.zeros((nx * ny, 3), np.float32)
    objp[:,:2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2)
    
    # Runs through images in calibration directory
    for img_file in os.listdir(img_cal_dir):
        # Reads in image and converts to gray
        img = cv2.imread(os.path.join(img_cal_dir,img_file))
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        
        #Finds the chessboard corners and appends to object points and image points
        ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
        if ret == True:
            cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
            if show_images:
                plt.imshow(img)
                plt.show()
            
            obj_points.append(objp)
            img_points.append(corners)
    
    # if not emptly, return the output of calibrate camera
    if obj_points:
        return cv2.calibrateCamera(obj_points, img_points, gray.shape[::-1], None, None)
    else:
        return (None, None, None, None, None)

# Test
ret, mtx, dist, rvecs, tvecs = calibrate_camera("./camera_cal", show_images = True)
        
    

Undistort Images

In [134]:
# Test the values recieved from test and undistort images.  Distorted on Left and undistorted on the right 
def test_undistort(img_path):
    for img_file in os.listdir(img_path):
        img = cv2.imread(os.path.join(img_path,img_file))
        undistorted_image = cv2.undistort(img, mtx, dist, None, mtx)
        cv2.imwrite(os.path.join(img_path,img_file.replace(".jpg","_undistorted.jpg")),undistorted_image)
        
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
        ax1.imshow(img)
        ax2.imshow(undistorted_image, cmap='gray')
        
test_undistort("./camera_cal")
        
        
In [135]:
# Read in the image we will be testing on.
img = cv2.imread("./test_images/test1.jpg")
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
img = cv2.undistort(img,mtx,dist,None,mtx)
cv2.imwrite(os.path.join('./',"test1.jpg".replace(".jpg","_undistorted.jpg")),img)
plt.imshow(img)
plt.show()
In [136]:
# Create the sobel threshold
def sobel_threshold(img, sobel_kernel=3, thresh=(0, 255), orientation=[1,0]):
    # Convert to gray scale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # perform the sobel on the gray scale image depending on the orientation that was selected
    sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, orientation[0], orientation[1], ksize=sobel_kernel))
    # scale the image
    scaled = np.uint8(255 * sobel/np.max(sobel))
    # create a blank binary output
    binary_output = np.zeros_like(scaled)
    # Turn everything to 1 that is within the threshold values
    binary_output[(scaled >= thresh[0]) & (scaled <= thresh[1])] = 1
    
    return binary_output
    
In [137]:
# Calculate the direction of the gradient
def calculate_direction(img, kernel=9, thresh=(0, np.pi/2)):
    # Convert to gray scale
    gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    # Get the sobel in the x and y direction
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel)
    #Take the absolute of the x and y and the perform the arc tan to get the direction
    absolute_grad_dir = np.arctan2(np.absolute(sobely), np.absolute(sobelx))
    #Create the blank (all zero) output image then threshold
    binary_output =  np.zeros_like(absolute_grad_dir)
    binary_output[(absolute_grad_dir >= thresh[0]) & (absolute_grad_dir <= thresh[1])] = 1
    return binary_output
In [138]:
# Calculate the magnitude of the thresholding
def calculate_magnitude(img,kernel=9, mag_thresh=(0, 255)):
    # Convert to gray 
    gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
    #Find x and y of sobel
    sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=kernel)
    sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=kernel)
    # Calculate the magnitude
    mag = np.sqrt(sobelx**2 + sobely**2)
    # scale the magnitude output
    scale_factor = np.max(mag)/255 
    mag = (mag/scale_factor).astype(np.uint8) 
    # Create the binary image and thresholds it.
    binary_output = np.zeros_like(mag)
    binary_output[(mag >= mag_thresh[0]) & (mag <= mag_thresh[1])] = 1
    return binary_output
In [139]:
# Threshold on the saturation channel of hls
def calculate_threshold(img, thresh=(170,255)):
    hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
    # Get saturation channel
    s = hls[:,:,2]
    # Threshold it
    binary_output = np.zeros_like(s)
    binary_output[(s<=thresh[1]) & (s>=thresh[0])] = 1
    return binary_output
In [140]:
# Creates the pipline of all the above thresholds and combines them into one image
def threshold_pipeline(img):
    x_gradient = sobel_threshold(img, sobel_kernel=15, thresh=(30, 100), orientation=[1,0])
    y_gradient = sobel_threshold(img, sobel_kernel=15, thresh=(30, 100), orientation=[0,1])
    magnitude = calculate_magnitude(img, kernel=15, mag_thresh=(70, 110))
    direction = calculate_direction(img, kernel=15, thresh=(0.8, 1.4))
    s_channel_binary = calculate_threshold(img)
    
    binary = np.zeros_like(x_gradient)
    binary[((x_gradient==1) & (y_gradient==1)) | ((magnitude == 1) & (direction == 1)) | (s_channel_binary == 1)] = 1
    return binary

#Test
test = threshold_pipeline(img)
cv2.imwrite(os.path.join('./',"test1.jpg".replace(".jpg","_threshold.jpg")),test)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(img)
ax2.imshow(test, cmap='gray')
Out[140]:
<matplotlib.image.AxesImage at 0x7f3f9cdee6a0>
In [141]:
# This will warp the image to get a top down look
def warp_image(img, show_dots = False):
    width, height = (img.shape[1], img.shape[0])
    
    # points selected from the input image
    src_coords = np.float32(
        [[285,  720],  
         [590,  460],  
         [720,  460],  
         [1125, 720]])
    if show_dots:
        f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
        ax1.imshow(img)
        ax1.plot(src_coords[0][0],src_coords[0][1], '.')
        ax1.plot(src_coords[1][0],src_coords[1][1], '.')
        ax1.plot(src_coords[2][0],src_coords[2][1], '.')
        ax1.plot(src_coords[3][0],src_coords[3][1], '.')
    # Points to transform to  
    dst_coords = np.float32(
        [[250,  720],  
         [250,    0],  
         [1060,   0],  
         [1060, 720]])    
    
    # calucluate the transform matrix
    transform = cv2.getPerspectiveTransform(src_coords, dst_coords)
    
    # calculate the inverse transform matrix
    inverse_transform = cv2.getPerspectiveTransform(dst_coords, src_coords)
    
    # Warp image
    warped_image = cv2.warpPerspective(img, transform, (width,height), flags=cv2.INTER_LINEAR)
    
    if show_dots:
        ax2.imshow(warped_image)
        ax2.plot(dst_coords[0][0],dst_coords[0][1], '.')
        ax2.plot(dst_coords[1][0],dst_coords[1][1], '.')
        ax2.plot(dst_coords[2][0],dst_coords[2][1], '.')
        ax2.plot(dst_coords[3][0],dst_coords[3][1], '.')

    return warped_image, transform, inverse_transform
In [142]:
#Test
test_warped, M, Minv = warp_image(test, show_dots = True)
cv2.imwrite(os.path.join('./',"test1.jpg".replace(".jpg","_warped.jpg")),test_warped)
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(test,cmap="gray")
ax2.imshow(test_warped,cmap="gray")
Out[142]:
<matplotlib.image.AxesImage at 0x7f3f9cd17ba8>

Detect Lines

In [143]:
# Create a histogram with an input of the warped image
def hist(img):
    bottom_half = img[img.shape[0]//2:,:]
    histogram = np.sum(bottom_half, axis=0)
    return histogram
In [144]:
#Test
histogram = hist(test_warped)
plt.plot(histogram)
Out[144]:
[<matplotlib.lines.Line2D at 0x7f3f9cc0e1d0>]
In [145]:
def detect_lines(binary_warped, sliding_windows=10, margin=100, minpix=50, return_image = False):
    # Create histogram
    histogram = hist(binary_warped)
    
    #If return image create output
    if return_image:
            out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
    
    # Find the midpoint of the histogram shape.
    midpoint = np.int(histogram.shape[0]//2)
    # Finds the largest spike on the right and left side
    leftx_base = np.argmax(histogram[:midpoint])
    rightx_base = np.argmax(histogram[midpoint:]) + midpoint
    
    #Find the windo height based on the number of sliding windows
    window_height = np.int(binary_warped.shape[0]//sliding_windows)
    
    # Finds all the activated pixels
    nonzero = binary_warped.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    
    
    leftx_current = leftx_base
    rightx_current = rightx_base
    
    left_lane_inds = []
    right_lane_inds = []
    
    for window in range(sliding_windows):
        
        # create the window boundaries
        win_y_low = binary_warped.shape[0] - (window+1)*window_height
        win_y_high = binary_warped.shape[0] - window*window_height
        # Get the boundaries of the window
        win_xleft_low = leftx_current - margin
        win_xleft_high = leftx_current + margin
        win_xright_low = rightx_current - margin
        win_xright_high = rightx_current + margin 
        
        # Creates the window on the left and right lane
        if return_image:
            cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high), (0,255,0), 3) 
            cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high), (0,255,0), 3) 
        
        # find the non zero pixels in the window
        good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xleft_low) &  (nonzerox < win_xleft_high)).nonzero()[0]
        good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & 
        (nonzerox >= win_xright_low) &  (nonzerox < win_xright_high)).nonzero()[0]
        
        # append to list
        left_lane_inds.append(good_left_inds)
        right_lane_inds.append(good_right_inds)
        
        if len(good_left_inds) > minpix:
            leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
        if len(good_right_inds) > minpix:        
            rightx_current = np.int(np.mean(nonzerox[good_right_inds]))

    try:
        left_lane_inds = np.concatenate(left_lane_inds)
        right_lane_inds = np.concatenate(right_lane_inds)
    except ValueError:
        pass
    
    # Get pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    
    
    # use polyfit to fit a second order polynomial
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    # get x and y values for plotting
    ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
    try:
        left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
        right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    except TypeError:
        # Avoids an error if `left` and `right_fit` are still none or incorrect
        print('The function failed to fit a line!')
        left_fitx = 1*ploty**2 + 1*ploty
        right_fitx = 1*ploty**2 + 1*ploty
        
    # Check for return image.  If true the return image along with the lines.
    if return_image:
        # Color pixels within the window, blue for right and red for left
        out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
        out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
        for index in range(img.shape[0]):
            cv2.circle(out_img, (int(left_fitx[index]), int(ploty[index])), 3, (255,255,0))
            cv2.circle(out_img, (int(right_fitx[index]), int(ploty[index])), 3, (255,255,0))

        return (left_fit, right_fit), (left_fitx, ploty), (right_fitx, ploty), out_img
    
    return (left_fit, right_fit), (left_fitx, ploty), (right_fitx, ploty)

#test function

lines_fit, left_points, right_points, out_img = detect_lines(test_warped, return_image = True)
plt.imshow(out_img)
Out[145]:
<matplotlib.image.AxesImage at 0x7f3f9cb97208>
In [146]:
def detect_similar_lines(img, line_fits, return_image=True):
    
    if line_fits is None:
        return detect_lines(img, return_image)
    
    # get left and right line fits
    left_fit = line_fits[0]
    right_fit = line_fits[1]
    
    # get the none zeros in the image
    nonzero = img.nonzero()
    nonzeroy = np.array(nonzero[0])
    nonzerox = np.array(nonzero[1])
    margin = 100
    
    # Get the left and right lane indicies
    left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy + 
    left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) + 
    left_fit[1]*nonzeroy + left_fit[2] + margin))) 

    right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy + 
    right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) + 
    right_fit[1]*nonzeroy + right_fit[2] + margin)))  

    # Extract the left and right lane pixel positions
    leftx = nonzerox[left_lane_inds]
    lefty = nonzeroy[left_lane_inds] 
    rightx = nonzerox[right_lane_inds]
    righty = nonzeroy[right_lane_inds]
    
    if (leftx.size == 0 or rightx.size == 0):
        return detect_lines(img, return_img)
    
    # fit the polynomial to lines
    left_fit = np.polyfit(lefty, leftx, 2)
    right_fit = np.polyfit(righty, rightx, 2)
    
    ploty = np.linspace(0, img.shape[0] - 1, img.shape[0] )
    
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
    
    # If no return image then just return points else continue to return image too
    if not return_image:
        return (left_fit, right_fit), (left_fitx, ploty), (right_fitx, ploty)
    out_img = np.dstack((img, img, img))*255
    window_img = np.zeros_like(out_img)

    left_line_window1 = np.array([np.transpose(np.vstack([left_fitx - margin, ploty]))])
    left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx + margin, ploty])))])
    left_line_pts = np.hstack((left_line_window1, left_line_window2))
    right_line_window1 = np.array([np.transpose(np.vstack([right_fitx - margin, ploty]))])
    right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx + margin, ploty])))])
    right_line_pts = np.hstack((right_line_window1, right_line_window2))

    cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
    cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
    out_img = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)

    out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
    out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]

    for index in range(img.shape[0]):
        cv2.circle(out_img, (int(left_fitx[index]), int(ploty[index])), 3, (255,255,0))
        cv2.circle(out_img, (int(right_fitx[index]), int(ploty[index])), 3, (255,255,0))

    return (left_fit, right_fit), (left_fitx, ploty), (right_fitx, ploty), out_img.astype(int)
    
#test
lines_fit, left_points, right_points, out_img = detect_similar_lines(test_warped, lines_fit)
cv2.imwrite(os.path.join('./',"test1.jpg".replace(".jpg","_line_detection.jpg")),out_img)

plt.imshow(out_img)
Out[146]:
<matplotlib.image.AxesImage at 0x7f3f9cafb7f0>

Curvature and position

In [147]:
def get_curvature_and_offset(x_left, x_right, img_shape):
    ploty = np.linspace(0, img_shape[0] - 1, img_shape[0])
    
    # Calculate top to bottom of left and right lines
    x_left = x_left[::-1]  
    x_right = x_right[::-1]
    
    # fit the second order polynomial
    left_fit = np.polyfit(ploty, x_left, 2)
    left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
    right_fit = np.polyfit(ploty, x_right, 2)
    right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]

    ym_per_pix = 30/720 
    xm_per_pix = 3.7/800

    # calculate the curvature
    y_eval = np.max(ploty)
    left_fit_cr = np.polyfit(ploty*ym_per_pix, x_left*xm_per_pix, 2)
    right_fit_cr = np.polyfit(ploty*ym_per_pix, x_right*xm_per_pix, 2)
    left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
    right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
    
    # Find the cars center
    car_pos = (img_shape[1]//2)* xm_per_pix
        
    ## the center of the lane
    center_of_lane = ((x_left[-1] + x_right[-1])/2)* xm_per_pix
    
    curvature = (left_curverad+right_curverad)/2
        
    return curvature, (center_of_lane - car_pos)


curvature, x_distance = get_curvature_and_offset(x_left=left_points[0], x_right=right_points[0], img_shape = img.shape)

# Print the results
print('Curvature radius:', curvature, 'm')

print ('Car position from center:', x_distance, 'm.')


    
Curvature radius: 970.646453455653 m
Car position from center: 0.2700403529430102 m.

Warp Line back

In [148]:
def invert_lines(image, warped_image, left_points, right_points, inverse_transform):
    warp_zero = np.zeros_like(warped_image).astype(np.uint8)
    color_warp = np.dstack((warp_zero, warp_zero, warp_zero))

    
    left_fitx = left_points[0]
    right_fitx = right_points[0]
    ploty = left_points[1]

    pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
    pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
    pts = np.hstack((pts_left, pts_right))

    cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))

    newwarp = cv2.warpPerspective(color_warp, inverse_transform, (image.shape[1], image.shape[0])) 

    return cv2.addWeighted(image, 1, newwarp, 0.3, 0)

#test

output = invert_lines(img, test_warped, left_points, right_points, Minv)

plt.imshow(output)
Out[148]:
<matplotlib.image.AxesImage at 0x7f3f9cac8e80>
In [149]:
def insert_calculations(img, left, right, x_per_pixel=3.7/800 ,y_per_pixel=25/720):
    
    curvature, x_offset = get_curvature_and_offset(x_left=left, x_right=right, img_shape = img.shape)

    out_img = img.copy()
    cv2.putText(out_img, 'Radius of curvature = {:.2f}(m)'.format(curvature), 
                (60, 60), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
    
    if x_offset > 0:
        cv2.putText(out_img, 'Vehicle is {:.2f} m Right of center.'.format(x_offset), 
                    (60, 160), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
    elif x_offset < 0:
        cv2.putText(out_img, 'Vehicle is {:.2f} m Left of center.'.format(abs(x_offset)), 
                    (60, 160), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
    else:
        cv2.putText(out_img, 'Vehicle is is centered.', 
                    (60, 160), cv2.FONT_HERSHEY_SIMPLEX, 2, (255,255,255), 5)
    
    return out_img


test = insert_calculations(output, left_points[0], right_points[0])
cv2.imwrite(os.path.join('./',"test1.jpg".replace(".jpg","_final.jpg")),test)
Out[149]:
True
In [150]:
class Pipeline:
    def __init__(self, images):

        # Calibrate camera
        self.images = images
        self.ret, self.mtx, self.dist, self.rvecs, self.tvecs = self.run_calibration()
        self.prev_lines = None
        
    def run_calibration(self):
        return calibrate_camera(self.images)
    
    def run_pipeline(self, img):
        img = cv2.undistort(img, mtx, dist, None, mtx)

        combined = threshold_pipeline(img)  

        combined_warped, M, Minv = warp_image(combined)
        
        try:        
            self.prev_lines, left_detections, right_detections, out_img = detect_similar_lines(combined_warped, self.prev_lines, return_image=True)
        except:
            self.prev_lines, left_detections, right_detections = detect_similar_lines(combined_warped, self.prev_lines)
        img_lane = invert_lines(img, combined_warped, left_detections, right_detections, Minv)
            
        result = insert_calculations(img_lane, left=left_detections[0], right=right_detections[0])
            
        return result
    

    def __call__(self, img):
        return self.run_pipeline(img)
        
In [151]:
input_video = './project_video.mp4'
output_video = './project_video_ouput.mp4'

video_clip = VideoFileClip(input_video)


pipeline = Pipeline("./camera_cal")

white_clip = video_clip.fl_image(pipeline)

%time white_clip.write_videofile(output_video, audio=False)
t:   0%|          | 0/1260 [00:00<?, ?it/s, now=None]
Moviepy - Building video ./project_video_ouput.mp4.
Moviepy - Writing video ./project_video_ouput.mp4

                                                                
Moviepy - Done !
Moviepy - video ready ./project_video_ouput.mp4
CPU times: user 16min 53s, sys: 35.9 s, total: 17min 29s
Wall time: 8min 4s
In [152]:
HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
</video>
""".format(output_video))
Out[152]:
In [153]:
input_video = './harder_challenge_video.mp4'
output_video = './harder_challenge_video_ouput.mp4'

video_clip = VideoFileClip(input_video)


process_image = ProcessImage("./camera_cal")

white_clip = video_clip.fl_image(process_image)

%time white_clip.write_videofile(output_video, audio=False)
t:   0%|          | 0/1199 [00:00<?, ?it/s, now=None]
Moviepy - Building video ./harder_challenge_video_ouput.mp4.
Moviepy - Writing video ./harder_challenge_video_ouput.mp4

                                                                
Moviepy - Done !
Moviepy - video ready ./harder_challenge_video_ouput.mp4
CPU times: user 17min, sys: 37.6 s, total: 17min 38s
Wall time: 8min 11s
In [154]:
HTML("""
<video width="640" height="360" controls>
  <source src="{0}">
</video>
""".format(output_video))
Out[154]: